event.bytes = 0;
event.fields.v = 1;
- event.fields.type = EVENTTYPE_NMI;
+ event.fields.type = X86_EVENTTYPE_NMI;
event.fields.vector = 2;
ASSERT(vmcb->eventinj.fields.v == 0);
event.bytes = 0;
event.fields.v = 1;
- event.fields.type = EVENTTYPE_INTR;
+ event.fields.type = X86_EVENTTYPE_EXT_INTR;
event.fields.vector = vector;
ASSERT(vmcb->eventinj.fields.v == 0);
/* hardware assisted paging bits */
extern int opt_hap_enabled;
-static void svm_inject_exception(struct vcpu *v, int trap,
- int ev, int error_code)
+static void svm_inject_exception(
+ struct vcpu *v, int trap, int ev, int error_code)
{
eventinj_t event;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
event.bytes = 0;
event.fields.v = 1;
- event.fields.type = EVENTTYPE_EXCEPTION;
+ event.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
event.fields.vector = trap;
event.fields.ev = ev;
event.fields.errorcode = error_code;
* Clear NMI-blocking interruptibility info if an NMI delivery
* faulted. Re-delivery will re-set it (see SDM 3B 25.7.1.2).
*/
- if ( (idtv_info_field&INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI )
+ if ( (idtv_info_field&INTR_INFO_INTR_TYPE_MASK) ==
+ (X86_EVENTTYPE_NMI << 8) )
__vmwrite(GUEST_INTERRUPTIBILITY_INFO,
__vmread(GUEST_INTERRUPTIBILITY_INFO) &
~VMX_INTR_SHADOW_NMI);
vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
break;
case TRAP_nmi:
- if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) != INTR_TYPE_NMI )
+ if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) !=
+ (X86_EVENTTYPE_NMI << 8) )
goto exit_and_crash;
HVMTRACE_0D(NMI, v);
vmx_store_cpu_guest_regs(v, regs, NULL);
/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK (1U << TRAP_machine_check)
+/*
+ * x86 event types. This enumeration is valid for:
+ * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8]
+ * AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only)
+ */
+#define X86_EVENTTYPE_EXT_INTR 0 /* external interrupt */
+#define X86_EVENTTYPE_NMI 2 /* NMI */
+#define X86_EVENTTYPE_HW_EXCEPTION 3 /* hardware exception */
+#define X86_EVENTTYPE_SW_INTERRUPT 4 /* software interrupt */
+#define X86_EVENTTYPE_SW_EXCEPTION 6 /* software exception */
+
static inline int hvm_cpu_up(void)
{
if ( hvm_funcs.cpu_up )
} fields;
} __attribute__ ((packed)) eventinj_t;
-enum EVENTTYPES
-{
- EVENTTYPE_INTR = 0,
- EVENTTYPE_NMI = 2,
- EVENTTYPE_EXCEPTION = 3,
- EVENTTYPE_SWINT = 4,
-};
-
typedef union
{
u64 bytes;
#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
#define INTR_INFO_RESVD_BITS_MASK 0x7ffff000
-#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
-#define INTR_TYPE_NMI (2 << 8) /* NMI */
-#define INTR_TYPE_HW_EXCEPTION (3 << 8) /* hardware exception */
-#define INTR_TYPE_SW_EXCEPTION (6 << 8) /* software exception */
-
/*
* Exit Qualifications for MOV for Control Register Access
*/
* VM entry]", PRM Vol. 3, 22.6.1 (Interruptibility State).
*/
- intr_fields = (INTR_INFO_VALID_MASK | type | trap);
+ intr_fields = (INTR_INFO_VALID_MASK | (type<<8) | trap);
if ( error_code != VMX_DELIVER_NO_ERROR_CODE ) {
__vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
struct vcpu *v, int trap, int error_code)
{
v->arch.hvm_vmx.vector_injected = 1;
- __vmx_inject_exception(v, trap, INTR_TYPE_HW_EXCEPTION, error_code);
+ __vmx_inject_exception(v, trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
}
static inline void vmx_inject_extint(struct vcpu *v, int trap)
{
- __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR,
+ __vmx_inject_exception(v, trap, X86_EVENTTYPE_EXT_INTR,
VMX_DELIVER_NO_ERROR_CODE);
}
static inline void vmx_inject_nmi(struct vcpu *v)
{
- __vmx_inject_exception(v, 2, INTR_TYPE_NMI,
+ __vmx_inject_exception(v, 2, X86_EVENTTYPE_NMI,
VMX_DELIVER_NO_ERROR_CODE);
}